library(tidyverse)
library(janitor)
library(cowplot)
library(here)
library(readxl)
library(Matrix)
library(lme4)
library(lmerTest)
library(TOSTER)
library(wesanderson)
library(gghalves)
library(car)
library(multilevelTools)
library(JWileymisc)
library(broom.mixed)
library(papaja)
library(effectsize)
library(ggimage)
library(sessioninfo)
library(broom)
source("compute_iccs.R")
source("common.R")
theme_set(theme_cowplot())
knitr::opts_chunk$set(cache = FALSE, warn = FALSE,warning=FALSE, message = FALSE)
#set paths
data_file_path <- here::here("..","..","data","processed_data","CATegories_exp2_processed_data_with_exclusion_info.csv")
useable_trial_summary_path <- here::here("..","..","data","processed_data","CATegories_exp2_useable_trial_summary.csv")
summarize_subj_trials_path <- here::here("..","..","data","processed_data","CATegories_exp2_trial_summary_data.csv")
participant_data_path <- here::here("..","..","data","processed_data","CATegories_exp2_processed_participant_data_anonymized.csv")
resampled_data_file_path <- here::here("..","..","data","processed_data","CATegories_exp2_processed_data_resampled.csv")
#load data
d <- read_csv(data_file_path)
participant_total_useable_trials <- read_csv(useable_trial_summary_path)
summarize_subj_trials <- read_csv(summarize_subj_trials_path)
participant_data <- read_csv(participant_data_path)
d_resampled <- read_csv(resampled_data_file_path) #note that this file gets created in cluster_permutation_analysis.Rmd
In order for a trial to be included, participants must contribute at least 50% looking during the windows of interest when computing baseline-corrected proportion target looking: the critical window (300 ms - 2800 ms relative to target word onset) and the baseline window (-2000 ms - 0 ms relative to target word onset). See 2_process_exclusions.Rmd for detailed processing steps.
Overall, among the trials contributed by 141 participants (excluding 2 participants whose data was filtered out earlier in the process, because they either did not provide any useable trials or due to parent interference throughout the session), 80.9% of trials contained sufficient looking to meet our trial-level inclusion criteria (at least 50% looking during both the baseline window and the critical window). Overall, after additional trial-level exclusions (due technical errors or frame rate issues), 78.3% were retained. 84 of the 141 participants contributed valid data on at least half of the experimental trials (for all participants: M = 25.9; only for included participants: M = 36.8).
participant_data_summarized <- participant_data %>%
summarize(
N=length(unique(sub_num)),
num_sessions = sum(session<3),
session_1_N = sum(session==1),
session_2_N = sum(session==2)
)
participant_data_summarized %>%
knitr::kable()
| N | num_sessions | session_1_N | session_2_N |
|---|---|---|---|
| 143 | 255 | 143 | 112 |
participant_data_summarized <- participant_data %>%
distinct(sub_num,session_1_data,session_2_data) %>%
left_join(participant_total_useable_trials)
#participants without session 2 data who were subsequently excluded
sum(filter(participant_data_summarized,session_2_data=="N")$exclude_participant)
## [1] 27
#participants with session 2 data who were subsequently excluded
sum(filter(participant_data_summarized,session_2_data=="Y")$exclude_participant)
## [1] 32
#summarize subj info
subj_info_multisession <- d %>%
distinct(sub_num, age,age_mo,child_gender,trial_order) %>%
mutate(
age_mo_c = age_mo - mean(age_mo),
age_c = age - mean(age)
)
subj_info <- d %>%
distinct(sub_num,child_gender) %>%
summarize(
N = n(),
N_female = sum(child_gender=="f")
)
overall_subj_info <- subj_info_multisession %>%
summarize(
N = length(unique(sub_num)),
sessions = n(),
mean_age = mean(age_mo),
min_age = min(age),
max_age = max(age),
sd_age = sd(age_mo)
) %>%
left_join(subj_info)
overall_subj_info %>%
knitr::kable()
| N | sessions | mean_age | min_age | max_age | sd_age | N_female |
|---|---|---|---|---|---|---|
| 141 | 248 | 15.73065 | 410 | 718 | 1.613515 | 70 |
# subjects with usable trial data only
subj_info_multisession_usable_trials <- d %>%
filter(exclude_participant==0) %>%
distinct(sub_num, age,age_mo,child_gender,trial_order) %>%
mutate(
age_mo_c = age_mo - mean(age_mo),
age_c = age - mean(age)
)
subj_info_usable_trials <- d %>%
filter(exclude_participant==0) %>%
filter(useable_window==1) %>%
distinct(sub_num,child_gender) %>%
summarize(
N = n(),
N_female = sum(child_gender=="f")
)
overall_subj_info_usable_trials <- subj_info_multisession_usable_trials %>%
summarize(
N = length(unique(sub_num)),
sessions = n(),
mean_age = mean(age_mo),
mean_age_days=mean(age),
min_age = min(age),
max_age = max(age),
sd_age = sd(age_mo)
) %>%
left_join(subj_info_usable_trials)
overall_subj_info_usable_trials %>%
knitr::kable()
| N | sessions | mean_age | mean_age_days | min_age | max_age | sd_age | N_female |
|---|---|---|---|---|---|---|---|
| 84 | 164 | 15.70122 | 477.8963 | 410 | 574 | 1.490131 | 51 |
Next, we summarize demographic information for the final sample (removing participant exclusions).
demographics_summary <- d %>%
filter(exclude_participant == 0) %>%
distinct(sub_num,demographic_us_race_ethnicity_identification,demographic_education_level,demographic_annual_income,demographic_country,demographic_state,demographic_density)
multiple_categories_list <- c("White, Middle Eastern or North African","White, Hispanic, Latino, or Spanish origin","White, Black or African American, Middle Eastern or North African","White, Black or African American","White, Asian, Middle Eastern or North African","White, Asian","Hispanic, Latino, or Spanish origin, Black or African American","Hispanic, Latino, or Spanish origin, Asian","Black or African American, Asian","Asian, Middle Eastern or North African")
race_ethnicity <- demographics_summary %>%
group_by(demographic_us_race_ethnicity_identification)%>%
summarize(
N = n()
) %>%
ungroup() %>%
mutate(percent=N/sum(N),
multiple_categories=ifelse(demographic_us_race_ethnicity_identification %in% multiple_categories_list,1,0))
#percent reporting multiple categories
sum(filter(race_ethnicity,multiple_categories==1)$N)/sum(race_ethnicity$N)
## [1] 0.2261905
#quick visualization
ggplot(demographics_summary,aes(y=demographic_us_race_ethnicity_identification)) +
geom_bar()
income <- demographics_summary %>%
group_by(demographic_annual_income)%>%
summarize(
N = n()
)
income %>%
knitr::kable()
| demographic_annual_income | N |
|---|---|
| 100000 | 6 |
| 110000 | 1 |
| 120000 | 5 |
| 130000 | 4 |
| 140000 | 1 |
| 15000 | 1 |
| 150000 | 11 |
| 160000 | 1 |
| 170000 | 1 |
| 180000 | 1 |
| 190000 | 1 |
| 20000 | 1 |
| 30000 | 1 |
| 40000 | 4 |
| 50000 | 7 |
| 60000 | 5 |
| 70000 | 4 |
| 80000 | 5 |
| 90000 | 3 |
| >200000 | 19 |
| NA | 2 |
#quick visualiztion
ggplot(demographics_summary,aes(demographic_annual_income)) +
geom_bar()+
theme(axis.text.x = element_text(angle = 90))
education <- demographics_summary %>%
group_by(demographic_education_level)%>%
summarize(
N = n()
)
education %>%
knitr::kable()
| demographic_education_level | N |
|---|---|
| assoc | 1 |
| bach | 31 |
| col | 2 |
| grad | 3 |
| hs | 1 |
| prof | 46 |
states <- demographics_summary %>%
group_by(demographic_state)%>%
summarize(
N = n()
)
ggplot(demographics_summary,aes(demographic_state)) +
geom_bar()+
theme(axis.text.x = element_text(angle = 90))
population_density <- demographics_summary %>%
group_by(demographic_density) %>%
summarize(
N = n()
)
population_density %>%
knitr::kable()
| demographic_density | N |
|---|---|
| rural | 4 |
| suburban | 57 |
| urban | 22 |
| NA | 1 |
Here, we summarize each participants’ average accuracy during the critical window and average baseline-corrected proportion target looking.
# save key columns
key_cols_summarized_trial_data <- c(
"sub_num","session", "age","age_mo","days_between_sessions", "child_gender", "trial_order","trial_number","condition","target_image","distractor_image","target_category","distractor_category","target_typicality_z","distractor_typicality_z","target_parent_typicality_rating","distractor_parent_typicality_rating","target_parent_typicality_rating_z","distractor_parent_typicality_rating_z","target_parent_typicality_by_category_z","distractor_parent_typicality_by_category_z","mean_target_looking_critical","mean_target_looking_baseline","corrected_target_looking","exclude_participant","age_exclusion","trial_exclusion","trial_exclusion_reason","exclude_technical_issue","exclude_frame_rate","useable_window","useable_critical_window","useable_baseline_window","useable_window_short","total_trials_short","exclude_participant_insufficient_data_short","mean_target_looking_critical_short","corrected_target_looking_short")
#extract summarized trial-level accuracy (see 2_process_exclusions.Rmd for details on how summarized columns are computed)
trial_corrected_accuracy_all <- d %>%
select(all_of(key_cols_summarized_trial_data)) %>%
distinct()
trial_corrected_accuracy <- trial_corrected_accuracy_all %>%
filter(exclude_participant==0) %>%
filter(trial_exclusion==0)
# summarize average accuracy
avg_corrected_target_looking <- trial_corrected_accuracy %>%
group_by(sub_num) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
critical_window_ci = qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
critical_window_lower_ci=average_critical_window_looking-critical_window_ci,
critical_window_upper_ci=average_critical_window_looking+critical_window_ci)
#baseline-corrected target looking summarized overall
overall_corrected_target_looking <- avg_corrected_target_looking %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci)
overall_corrected_target_looking %>%
knitr::kable()
| N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|
| 84 | 0.0711788 | 0.0168447 | 0.0543341 | 0.0880235 |
We summarize each participants’ average accuracy during the critical window and average baseline-corrected proportion target looking, split by typicality condition.
# summarize average accuracy within participant
avg_corrected_target_looking_by_typicality <- trial_corrected_accuracy %>%
group_by(sub_num, condition) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
se=sd(corrected_target_looking,na.rm=T)/sqrt(N),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
lower_se=average_corrected_target_looking-se,
upper_se=average_corrected_target_looking+se,
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
critical_window_ci = qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
critical_window_lower_ci=average_critical_window_looking-critical_window_ci,
critical_window_upper_ci=average_critical_window_looking+critical_window_ci)
#baseline-corrected target looking summarized overall
overall_corrected_target_looking_by_typicality <- avg_corrected_target_looking_by_typicality %>%
group_by(condition) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci)
overall_corrected_target_looking_by_typicality %>%
knitr::kable()
| condition | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|
| atypical | 84 | 0.0647849 | 0.0193507 | 0.0454342 | 0.0841356 |
| typical | 84 | 0.0795095 | 0.0218702 | 0.0576393 | 0.1013797 |
We summarize each participants’ average accuracy during the critical window and average baseline-corrected proportion target looking, split by typicality condition and target label.
# summarize average accuracy within participant
avg_corrected_target_looking_by_category <- trial_corrected_accuracy %>%
group_by(sub_num, condition,target_category) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
critical_window_ci = qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
critical_window_lower_ci=average_critical_window_looking-critical_window_ci,
critical_window_upper_ci=average_critical_window_looking+critical_window_ci)
To investigate whether there was an effect of typicality on infants’ word recognition,we fit a linear mixed-effects model predicting average baseline-corrected proportion target looking from typicality condition (centered), including a by-participant random intercept. No random slope was included because the number of observations in the model would otherwise be equal to the number of random effects - however, a model with (overriding the lmer error message) or without typicality condition as a random slope yields identical results.
avg_corrected_target_looking_by_typicality <- avg_corrected_target_looking_by_typicality %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
m_1_1 <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c + (1+typicality_condition_c|sub_num),data=avg_corrected_target_looking_by_typicality,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
#Note that we ignore the warning here that number of observations is equal to the number of random effects, which lmer dislikes due to making it impossible to separate error at different hierarchical levels
#however, the model fit (at the fixed level) is for all intents and purposes identical for the model retaining the random slope for typicality and one removing the random slope for typicality
#(see commented-out model below)
#we therefore proceed with the (numerically identical) model with the random slope retained, for consistency with the Stage 1 plan.
#This same rationale applies to subsequent participant-level models we fit below.
#m_1_1 <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c + (1|sub_num),data=avg_corrected_target_looking_by_typicality)
summary(m_1_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_c +
## (1 + typicality_condition_c | sub_num)
## Data: avg_corrected_target_looking_by_typicality
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -310.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.13730 -0.38265 0.05135 0.39401 1.82405
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.003995 0.06320
## typicality_condition_c 0.004733 0.06880 0.25
## Residual 0.003876 0.06225
## Number of obs: 168, groups: sub_num, 84
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.072147 0.008404 83.000327 8.585 4.42e-13 ***
## typicality_condition_c 0.014725 0.012191 82.999799 1.208 0.231
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.128
## optimizer (nloptwrap) convergence code: 0 (OK)
## Model is nearly unidentifiable: large eigenvalue ratio
## - Rescale variables?
confint(m_1_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.055675942 0.08861852
## typicality_condition_c -0.009169538 0.03861872
Infants successfully recognized the target words (Model Intercept: b=0.07, 95% CI [0.06, 0.09], t(83) = 8.59, p< .001).
Next, we plot the overall average base-line corrected proportion target looking for each condition (in black). Individual points represent individual subjects, lines link subject responses between conditions. Error bars represent 95% CIs.
#Overall baseline-corrected proportion target looking by condition
pal <- wes_palette("Rushmore1", n=5)
set.seed(1)
jitterer <- position_jitter(width = .05,seed=1)
p0 <- ggplot(avg_corrected_target_looking_by_typicality,aes(x=condition,y=average_corrected_target_looking, fill=condition))+
geom_half_violin(data=filter(avg_corrected_target_looking_by_typicality, condition=="atypical"),position = position_nudge(x = -.1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="l")+
geom_half_violin(data=filter(avg_corrected_target_looking_by_typicality, condition=="typical"),position = position_nudge(x = .1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="r")+
geom_path(aes(group=sub_num),color="black",fill=NA,alpha=0.15,size=0.75,position=jitterer)+
geom_point(aes(color=condition,group=sub_num), size = 2.5, alpha=0.15,position=jitterer)+
geom_point(data=overall_corrected_target_looking_by_typicality,aes(y=corrected_target_looking),color="black",size=5)+
geom_line(data=overall_corrected_target_looking_by_typicality,aes(y=corrected_target_looking,group=1),color="black",size=3)+
geom_errorbar(data=overall_corrected_target_looking_by_typicality,aes(y=corrected_target_looking,ymin=lower_ci,ymax=upper_ci),width=0,size=1.2,color="black")+
#geom_boxplot(outlier.shape = NA, alpha = .5, width = .1, colour = "black")+
#scale_colour_brewer(palette = "Dark2")+
#scale_fill_brewer(palette = "Dark2")+
geom_hline(yintercept=0,linetype="dashed")+
scale_colour_manual(values=pal[c(3,4)])+
scale_fill_manual(values=pal[c(3,4)])+
theme(legend.position="none")+
xlab("Typicality Condition")+
ylab("Baseline-Corrected\nProportion Target Looking")+
theme(axis.title.x = element_text(face="bold", size=20),
axis.text.x = element_text(size=14),
axis.title.y = element_text(face="bold", size=20),
axis.text.y = element_text(size=16),
strip.text.x = element_text(size = 16,face="bold"))
p0
ggsave(here::here("..","figures","baseline_corrected_accuracy_overall.png"),width=7,height=6)
overall_condition_summary <- avg_corrected_target_looking_by_typicality %>%
ungroup() %>%
group_by(sub_num) %>%
summarize(
condition_diff = average_corrected_target_looking[condition=="typical"]-average_corrected_target_looking[condition=="atypical"]
) %>%
ungroup() %>%
summarize(
N=n(),
diff = mean(condition_diff),
sd = sd(condition_diff)
)
tsum_TOST(m1=overall_condition_summary$diff,sd1=overall_condition_summary$sd,n1=overall_condition_summary$N,eqb=0.25, eqbound_type = "SMD")
##
## One-sample t-Test
##
## The equivalence test was non-significant, t(83) = -1.083, p = 1.41e-01
## The null hypothesis test was non-significant, t(83) = 1.208, p = 2.31e-01
## NHST: don't reject null significance hypothesis that the effect is equal to zero
## TOST: don't reject null equivalence hypothesis
##
## TOST Results
## t df p.value
## t-test 1.208 83 0.231
## TOST Lower 3.499 83 < 0.001
## TOST Upper -1.083 83 0.141
##
## Effect Sizes
## Estimate SE C.I. Conf. Level
## Raw 0.01472 0.01219 [-0.0056, 0.035] 0.9
## Hedges's g 0.13059 0.10957 [-0.0484, 0.3088] 0.9
## Note: SMD confidence intervals are an approximation. See vignette("SMD_calcs").
There was no significant effect of typicality, b=0.01, 95% CI [-0.009, 0.04], t(83)=1.20, p=.23, Cohen’s d=0.13. However, the equivalence test was also not statistically significant, t(83) = -1.08, p=.14. We therefore could not reject the null hypothesis that the absolute effect size was at least as large as d=0.25.
#quick sanity check
#t-test in tost is equivalent to regular old paired t-test
# AND equivalent to lmer estimate of typicality effect
t.test(
avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="typical"],
avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="atypical"],
paired=T)
##
## Paired t-test
##
## data: avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition == "typical"] and avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition == "atypical"]
## t = 1.2078, df = 83, p-value = 0.2305
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -0.009522991 0.038972172
## sample estimates:
## mean difference
## 0.01472459
#effect size
cohens_d(avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="typical"],
avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="atypical"],
paired=T)
## Cohen's d | 95% CI
## -------------------------
## 0.13 | [-0.08, 0.35]
## Typical word recognition
# recentering the model on the typical condition to make the intercept interpretable
m_1_1_3_typ <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_typ + (1+ typicality_condition_typ|sub_num),data=avg_corrected_target_looking_by_typicality,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_typ)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_typ +
## (1 + typicality_condition_typ | sub_num)
## Data: avg_corrected_target_looking_by_typicality
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -310.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4304 -0.4351 0.0584 0.4481 2.0743
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.005144 0.07172
## typicality_condition_typ 0.002461 0.04960 0.66
## Residual 0.005012 0.07079
## Number of obs: 168, groups: sub_num, 84
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.07951 0.01100 83.00124 7.231 2.16e-10 ***
## typicality_condition_typ 0.01472 0.01219 83.00076 1.208 0.231
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.652
confint(m_1_1_3_typ,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.057958346 0.10106071
## typicality_condition_typ -0.009169479 0.03861866
#effect size
cohens_d(avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="typical"])
## Cohen's d | 95% CI
## ------------------------
## 0.79 | [0.54, 1.03]
## Atypical word recognition
# recentering the model on the atypical condition to make the intercept interpretable
m_1_1_3_atyp <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_atyp + (1+ typicality_condition_atyp|sub_num),data=avg_corrected_target_looking_by_typicality,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_atyp)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_atyp +
## (1 + typicality_condition_atyp | sub_num)
## Data: avg_corrected_target_looking_by_typicality
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -310.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.3225 -0.4158 0.0558 0.4281 1.9821
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.003375 0.05809
## typicality_condition_atyp 0.003332 0.05772 -0.17
## Residual 0.004576 0.06765
## Number of obs: 168, groups: sub_num, 84
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.064785 0.009729 82.999758 6.659 2.8e-09 ***
## typicality_condition_atyp 0.014725 0.012191 83.000079 1.208 0.231
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.516
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_1_1_3_atyp,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.045716306 0.08385357
## typicality_condition_atyp -0.009169478 0.03861866
#effect size
cohens_d(avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="atypical"])
## Cohen's d | 95% CI
## ------------------------
## 0.73 | [0.48, 0.97]
Infants robustly recognized the target words for both typical (Model: b=0.08, 95% CI [0.06, 0.10], t(83)=7.23, p<.001; Cohen’s d = 0.79 [0.54, 1.03]; Mean baseline-corrected looking: M=8.0%, 95% CI [5.8%, 10.1%]) and atypical exemplars (Model: b=0.06, 95% CI [0.05, 0.08], t(83)=6.66, p<.001, Cohen’s d = 0.73 [0.48, 0.97]; Mean baseline-corrected looking: M=6.5%, 95% CI [4.5%, 8.4%]).
trial_corrected_accuracy <- trial_corrected_accuracy %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
#model with typicality condition yields a singular fit, so we removed the random slope
# however, the singular model still yields relatively identical results to the model
# without the typicality condition random slope
# m_1_2 <- lmer(corrected_target_looking ~ 1 + typicality_condition_c +
# (1 + typicality_condition_c|sub_num) +
# (1|target_category),
# data=trial_corrected_accuracy)
m_1_2 <- lmer(corrected_target_looking ~ 1 + typicality_condition_c +
(1 | sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_c + (1 |
## sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2247.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1618 -0.6169 -0.0213 0.6652 2.7744
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0028069 0.05298
## target_category (Intercept) 0.0002506 0.01583
## Residual 0.1185987 0.34438
## Number of obs: 3088, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.297e-02 1.163e-02 5.248e+00 6.276 0.00126 **
## typicality_condition_c 1.080e-02 1.241e-02 3.017e+03 0.870 0.38412
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.000
confint(m_1_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.05017938 0.09575134
## typicality_condition_c -0.01351946 0.03512238
#Centering on typical condition
m_1_2_typ <- lmer(corrected_target_looking ~ 1 + typicality_condition_typ +
(1 |sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_1_2_typ)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_typ + (1 |
## sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2247.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1618 -0.6169 -0.0213 0.6652 2.7744
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0028069 0.05298
## target_category (Intercept) 0.0002506 0.01583
## Residual 0.1185987 0.34438
## Number of obs: 3088, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.837e-02 1.318e-02 8.665e+00 5.947 0.000251 ***
## typicality_condition_typ 1.080e-02 1.241e-02 3.017e+03 0.870 0.384116
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.471
confint(m_1_2_typ,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.05253765 0.10419453
## typicality_condition_typ -0.01351946 0.03512238
#Centering on atypical condition
m_1_2_atyp <- lmer(corrected_target_looking ~ 1 + typicality_condition_atyp +
(1 |sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_1_2_atyp)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_atyp + (1 |
## sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2247.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1618 -0.6169 -0.0213 0.6652 2.7744
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0028069 0.05298
## target_category (Intercept) 0.0002506 0.01583
## Residual 0.1185987 0.34438
## Number of obs: 3088, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.756e-02 1.318e-02 8.662e+00 5.127 0.000701 ***
## typicality_condition_atyp 1.080e-02 1.241e-02 3.017e+03 0.870 0.384116
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.471
confint(m_1_2_atyp,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.04173737 0.09339188
## typicality_condition_atyp -0.01351946 0.03512238
The model with the maximal random effects structure yielded a singular fit that was only remedied by removing the by-participant random slope for typicality condition. However, the (singular) model including the typicality random slope yielded virtually identical results to the converging model including random intercepts for participant and target word only. As in the average participant-level analysis, infants’ overall recognition of target words was significant in the trial-level model (b=0.07, 95% CI [0.05,0.10], t(5.2)=6.28, p=.001) and there was no significant effect of typicality (b=0.01, 95% CI [-0.01,0.04], t(3017)=0.87, p=.38). Word recognition was robust both for typical (b=0.08, 95% CI [0.05,0.10], t(8.7)=5.95, p<.001) and atypical exemplars (b=0.07, 95% CI [0.04,0.9], t(8.7)=5.13, p<.001).
The cluster-based permutation analysis is executed in cluster_permutation_analysis.Rmd.
In the cluster-based permutation analyses, we found one cluster of adjacent time bins ranging from 0-300ms with |t|>2 (in the direction of higher accuracy for typical exemplars compared to atypical exemplars). However, this cluster did not reach significance in the permutation test, p=.27.
Next, we plot the data. First we summarize the data in two steps: (1) summarize the data by subject for each time point, followed by (2) averaging looking for each time point across subjects.
#summarizing within subject for each time point
summarize_subj <- d_resampled %>%
filter(exclude_participant==0) %>%
filter(useable_window==1) %>%
group_by(sub_num, child_gender, time_normalized_corrected) %>%
summarize(N=n(),
mean_age=mean(age),
mean_age_mo=mean(age_mo),
non_na_n = sum(!is.na(accuracy_transformed)),
mean_accuracy=mean(accuracy_transformed,na.rm=TRUE),
ci=qt(0.975, non_na_n-1)*sd(accuracy_transformed,na.rm=T)/sqrt(non_na_n),
lower_ci=mean_accuracy-ci,
upper_ci=mean_accuracy+ci) %>%
ungroup()
#summarizing across subjects for each time point
summarize_across_subj <- summarize_subj %>%
group_by(time_normalized_corrected) %>%
dplyr::summarize(n=n(),
accuracy=mean(mean_accuracy,na.rm=TRUE),
sd_accuracy=sd(mean_accuracy,na.rm=TRUE),
se_accuracy=sd_accuracy/sqrt(n))
ggplot(summarize_across_subj,aes(time_normalized_corrected,accuracy))+
xlim(-2000,4000)+
geom_smooth(method="gam")+
geom_errorbar(aes(ymin=accuracy-se_accuracy,ymax=accuracy+se_accuracy),width=0)+
geom_point()+
geom_vline(xintercept=0,size=1.5)+
geom_hline(yintercept=0.5,size=1.2,linetype="dashed")+
geom_vline(xintercept=300,linetype="dotted")+
ylim(0.35,0.65)+
xlab("Time (normalized to target word onset) in ms")+
ylab("Proportion Target Looking")
ggsave(here::here("..","figures","overall_accuracy.png"))
summarize_across_subj_by_age <- summarize_subj %>%
mutate(age_group=cut_number(mean_age_mo,n=4)) %>%
group_by(age_group,time_normalized_corrected) %>%
dplyr::summarize(n=n(),
accuracy=mean(mean_accuracy,na.rm=TRUE),
sd_accuracy=sd(mean_accuracy,na.rm=TRUE),
se_accuracy=sd_accuracy/sqrt(n))
ggplot(summarize_across_subj_by_age,aes(time_normalized_corrected,accuracy))+
xlim(-2000,4000)+
geom_smooth(method="gam")+
geom_errorbar(aes(ymin=accuracy-se_accuracy,ymax=accuracy+se_accuracy),width=0)+
geom_point()+
geom_vline(xintercept=0,size=1.5)+
geom_hline(yintercept=0.5,size=1.2,linetype="dashed")+
geom_vline(xintercept=300,linetype="dotted")+
facet_wrap(~age_group)+
xlab("Time (normalized to target word onset) in ms")+
ylab("Proportion Target Looking")
ggsave(here::here("..","figures","overall_accuracy_by_age.png"),width=12, height=9)
summarize_subj_condition <- d_resampled %>%
filter(exclude_participant==0) %>%
filter(useable_window==1) %>%
group_by(sub_num, child_gender, condition, time_normalized_corrected) %>%
summarize(
mean_age=mean(age),
mean_age_mo=mean(age_mo),
mean_accuracy=mean(accuracy_transformed,na.rm=TRUE))
summarize_across_subj_cond <- summarize_subj_condition %>%
group_by(condition,time_normalized_corrected) %>%
summarize(n=n(),
accuracy=mean(mean_accuracy,na.rm=TRUE),
sd_accuracy=sd(mean_accuracy,na.rm=TRUE),
se_accuracy=sd_accuracy/sqrt(n))
#plot
#timecourse plot
pal <- wes_palette("Rushmore1", n=5)
timecourse_plot <- ggplot(summarize_across_subj_cond,aes(time_normalized_corrected,accuracy,color=condition))+
geom_rect(data = data.frame(xmin = 300,
xmax = 2800,
ymin = -Inf,
ymax = Inf),
aes(x=NULL, y=NULL,xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,color=NULL),
fill = "grey", alpha = 0.2)+
geom_rect(data = data.frame(xmin = -2000,
xmax = 0,
ymin = -Inf,
ymax = Inf),
aes(x=NULL, y=NULL,xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,color=NULL),
fill = "grey", alpha = 0.2)+
geom_errorbar(aes(ymin=accuracy-se_accuracy,ymax=accuracy+se_accuracy),width=0)+
geom_point(alpha=0.5)+
geom_smooth(data=summarize_subj_condition,aes(y=mean_accuracy),method="gam")+
geom_vline(xintercept=0,size=1.5)+
geom_hline(yintercept=0.5,size=1.2,linetype="dashed")+
geom_vline(xintercept=300,linetype="dotted")+
geom_vline(xintercept=2800,linetype="dotted")+
geom_vline(xintercept=-2000,linetype="dotted")+
geom_vline(xintercept=0,linetype="dotted")+
theme(legend.position = c(0.8,0.15))+
annotate("text",label="Critical Window",x=1550,y=0.9,size=6)+
annotate("text",label="Baseline Window",x=-1000,y=0.9,size=6)+
ylim(0,1)+
#xlim(-2000,4000)+
scale_x_continuous(breaks=seq(-2000,4000,1000),limits=c(-2000,4000))+
scale_colour_manual(values=pal[c(3,4)])+
ylab("Proportion Target Looking")+
xlab("Time (centered on target word onset, in ms)")+
theme(
strip.background = element_rect(size=1, colour = "black"),
strip.text = element_text(size=16,face="bold"),
axis.title=element_text(size=20,face="bold"),
axis.text = element_text(size=14),
legend.text=element_text(size=18),
legend.title=element_text(size=18))
ggsave(here::here("..","figures","typicality_accuracy.png"),width=10,height=6)
#join in average age-centered variables
trial_corrected_accuracy <- trial_corrected_accuracy %>%
left_join(subj_info_multisession)
#fit main model
# the random slope of typicality causes a singular boundary fit
# all attempts to prune other random effects (including covariances) were not successful,
# so we removed the random slope for typicality.
# Note that the effects for the (singular fit) model including the random slope for typicality are
# equivalent to the model with typicality condition as a random slope.
m_2 <- lmer(corrected_target_looking ~ 1 + typicality_condition_c * age_mo_c +
(1|sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_c * age_mo_c +
## (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2252.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.13164 -0.63461 -0.02025 0.66770 2.80638
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0021621 0.04650
## target_category (Intercept) 0.0002606 0.01614
## Residual 0.1186060 0.34439
## Number of obs: 3088, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.258e-02 1.140e-02 4.644e+00 6.367 0.00185
## typicality_condition_c 1.062e-02 1.242e-02 3.018e+03 0.855 0.39251
## age_mo_c 1.806e-02 5.415e-03 8.871e+01 3.336 0.00124
## typicality_condition_c:age_mo_c 5.710e-03 8.413e-03 3.017e+03 0.679 0.49741
##
## (Intercept) **
## typicality_condition_c
## age_mo_c **
## typicality_condition_c:age_mo_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ ag_m_c
## typclty_cn_ 0.000
## age_mo_c -0.016 0.005
## typclt__:__ 0.004 -0.034 0.000
confint(m_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.050236937 0.09491934
## typicality_condition_c -0.013715660 0.03495001
## age_mo_c 0.007450156 0.02867635
## typicality_condition_c:age_mo_c -0.010779703 0.02219878
#save interim model object
m_2_tidy <- m_2 %>%
summarize_mixed_effects_model()
In the trial-level linear mixed-effects model including age, typicality condition, and their interaction, we found a significant effect of age (\(\hat{\beta} = 0.02\), 95% CI \([0.01, 0.03]\), \(t(88.71) = 3.34\), \(p = .001\)), suggesting that word recognition accuracy increased with age overall. There was no significant interaction between age and typicality (\(\hat{\beta} = 0.01\), 95% CI \([-0.01, 0.02]\), \(t(3,016.88) = 0.68\), \(p = .497\)), meaning that we found no evidence that the effect of typicality changed with age.
ggplot(avg_corrected_target_looking,aes(mean_age,average_corrected_target_looking))+
geom_pointrange(aes(ymin=lower_ci,ymax=upper_ci),
position=position_jitter(width=0.1),
width=0,
size=1.5) +
geom_hline(yintercept=0,linetype="dashed")+
geom_smooth(method="lm")+
xlab("Age (in months)")+
ylab("Baseline-Corrected Proportion Target Looking")+
ylim(-0.55,0.55)+
scale_x_continuous(breaks=seq(12,18,1))
ggsave(here::here("..","figures","age_relationship_baseline_corrected_accuracy.png"),width=7,height=6)
pal <- wes_palette("Rushmore1", n=5)
p1 <- ggplot(avg_corrected_target_looking_by_typicality,aes(mean_age_mo,average_corrected_target_looking,color=condition,group=condition))+
geom_pointrange(aes(ymin=lower_se,ymax=upper_se),
position=position_jitter(width=0.2),
width=0,
size=1) +
geom_hline(yintercept=0,linetype="dashed")+
geom_smooth(method="lm",color="black",size=1.3)+
xlab("Age (in months)")+
ylab("Baseline-Corrected\nProportion Target Looking")+
#ylim(-0.55,0.5)+
scale_colour_manual(values=pal[c(3,4)])+
scale_x_continuous(breaks=seq(12,18,1))+
facet_wrap(~condition)+
theme(
strip.background = element_rect(size=1, colour = "black"),
strip.text = element_text(size=16,face="bold"),
axis.title=element_text(size=20,face="bold"),
axis.text = element_text(size=14))+
theme(legend.position="none")
ggsave(here::here("..","figures","age_relationship_baseline_corrected_accuracy_typicality.png"),width=9,height=6)
#combine key plots into one main figure
library(patchwork)
(p0 + p1) / timecourse_plot +
plot_annotation(tag_levels = 'A')+
theme(plot.tag = element_text(size = 18))
ggsave(here::here("..","figures","main_figure.png"),width=12,height=12)
To test whether individual differences in word recognition or typicality effects are predicted by differences in experiences with each exemplar.
#subject details for aim 3 analysis (how many participants have survey data)
aim3_subject_info <- trial_corrected_accuracy %>%
filter(!is.na(target_parent_typicality_rating_z)) %>%
ungroup()%>%
summarize(
N = length(unique(sub_num)),
mean_age = mean(age_c),
sd_age = sd(age_c)
)
aim3_subject_info%>%
knitr::kable()
| N | mean_age | sd_age |
|---|---|---|
| 74 | -0.7330562 | 45.0664 |
#fit main model
# as before, the random slope of typicality (this time parent-rated typicality) causes a singular boundary fit
# all attempts to prune other random effects (including covariances) were not successful,
# so we removed the random slope for target_parent_typicality_rating_z.
# Note that the effects for the (singular fit) model including the random slope for typicality are
# equivalent to the model with typicality as a random slope.
# In other words, the decision to omit the random slope does not substantively impact the main estimates or change the pattern of results
# m_3 <- lmer(corrected_target_looking ~ 1 + target_parent_typicality_rating_z + age_mo_c + (1+ target_parent_typicality_rating_z|sub_num) + (1|target_category), trial_corrected_accuracy)
m_3 <- lmer(corrected_target_looking ~ 1 + target_parent_typicality_rating_z + age_mo_c + (1|sub_num) + (1|target_category), trial_corrected_accuracy)
summary(m_3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + target_parent_typicality_rating_z +
## age_mo_c + (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 1995.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.11217 -0.63232 -0.02323 0.66553 2.77341
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0024417 0.04941
## target_category (Intercept) 0.0003568 0.01889
## Residual 0.1196135 0.34585
## Number of obs: 2702, groups: sub_num, 74; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 7.239e-02 1.295e-02 4.658e+00 5.589
## target_parent_typicality_rating_z 4.967e-03 6.871e-03 2.323e+03 0.723
## age_mo_c 1.475e-02 5.956e-03 6.716e+01 2.477
## Pr(>|t|)
## (Intercept) 0.00316 **
## target_parent_typicality_rating_z 0.46984
## age_mo_c 0.01577 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) tr____
## trgt_prn___ 0.008
## age_mo_c 0.025 0.012
m_3_tidy <- m_3 %>%
summarize_mixed_effects_model()
Caregiver report of exemplar typicality did not significantly predict infants’ baseline-corrected word recognition accuracy (\(\hat{\beta} = 0.00\), 95% CI \([-0.01, 0.02]\), \(t(2,322.50) = 0.72\), \(p = .470\)). Controlling for parent-reported typicality, age remained a significant predictor of infants’ word recognition (\(\hat{\beta} = 0.01\), 95% CI \([0.00, 0.03]\), \(t(67.16) = 2.48\), \(p = .016\)).
#quick visualization of the (non-)effect for each category
#by category
ggplot(trial_corrected_accuracy,aes(target_parent_typicality_rating_z,corrected_target_looking))+
geom_point(alpha=0.1)+
geom_smooth(method = "lm")+
facet_wrap(~target_category)
We also conducted a series of robustness analyses to probe the degree to which any results hinged on key analytic decisions.
We conducted the same main analyses as those described above using an alternative, shorter critical window of 300-1800ms (e.g., Fernald et al., 2008) (with the exception of the cluster-based permutation analysis described in analysis section 1.3).
First, we summarize participants looking behavior as before, using the new critical window.
# apply exclusions based on the new critical window
trial_corrected_accuracy_short_window <- trial_corrected_accuracy_all %>%
filter(exclude_participant_insufficient_data_short==0) %>%
#only include trials that are useable based on looking in the 300-1800ms window
filter(useable_window_short==1) %>%
#ignore trial exclusions due to "insufficient looking" based on 300-2800ms window
filter(is.na(trial_exclusion_reason) | trial_exclusion_reason=="insufficient looking")
# summarize average accuracy within participant, split by typicality
avg_corrected_target_looking_by_typicality_short_window <- trial_corrected_accuracy_short_window %>%
group_by(sub_num, condition) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking_short,na.rm=TRUE),
se=sd(corrected_target_looking_short,na.rm=T)/sqrt(N),
ci=qt(0.975, N-1)*sd(corrected_target_looking_short,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
lower_se=average_corrected_target_looking-se,
upper_se=average_corrected_target_looking+se)
#baseline-corrected target looking summarized overall
overall_corrected_target_looking_by_typicality_short_window <- avg_corrected_target_looking_by_typicality_short_window %>%
group_by(condition) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci)
overall_corrected_target_looking_by_typicality_short_window %>%
knitr::kable()
| condition | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|
| atypical | 82 | 0.0574887 | 0.0213794 | 0.0361094 | 0.0788681 |
| typical | 82 | 0.0780616 | 0.0220918 | 0.0559697 | 0.1001534 |
#1.1. Participant-level analysis of the typicality effect
avg_corrected_target_looking_by_typicality_short_window <- avg_corrected_target_looking_by_typicality_short_window %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
m_4_1_1_1 <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c + (1+ typicality_condition_c|sub_num),data=avg_corrected_target_looking_by_typicality_short_window,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_4_1_1_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_c +
## (1 + typicality_condition_c | sub_num)
## Data: avg_corrected_target_looking_by_typicality_short_window
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -284.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.91157 -0.34766 0.09098 0.42244 1.26530
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.004173 0.06460
## typicality_condition_c 0.009034 0.09505 0.05
## Residual 0.003357 0.05794
## Number of obs: 164, groups: sub_num, 82
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.067775 0.008447 80.998673 8.023 6.76e-12 ***
## typicality_condition_c 0.020573 0.013858 81.000990 1.485 0.142
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.033
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_4_1_1_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.051218712 0.08433158
## typicality_condition_c -0.006588576 0.04773425
#effect size
cohens_d(
filter(avg_corrected_target_looking_by_typicality_short_window,condition=="typical")$average_corrected_target_looking,
filter(avg_corrected_target_looking_by_typicality_short_window,condition=="atypical")$average_corrected_target_looking,
paired=T)
## Cohen's d | 95% CI
## -------------------------
## 0.16 | [-0.05, 0.38]
There was no significant effect of typicality in the average participant-level analysis, b=0.02, 95% CI [-0.007, 0.05], t(81)=1.49, p=.14, Cohen’s d=0.16 [-0.05, 0.38].
# run equivalence test
overall_condition_summary_short_window <- avg_corrected_target_looking_by_typicality_short_window %>%
group_by(sub_num) %>%
summarize(
condition_diff_alternative = average_corrected_target_looking[condition=="typical"]-average_corrected_target_looking[condition=="atypical"]
) %>%
ungroup() %>%
summarize(
N=n(),
diff = mean(condition_diff_alternative),
sd = sd(condition_diff_alternative)
)
tsum_TOST(m1=overall_condition_summary_short_window$diff,sd1=overall_condition_summary_short_window$sd,n1=overall_condition_summary_short_window$N,eqb=0.25, eqbound_type = "SMD")
##
## One-sample t-Test
##
## The equivalence test was non-significant, t(81) = -0.779, p = 2.19e-01
## The null hypothesis test was non-significant, t(81) = 1.485, p = 1.42e-01
## NHST: don't reject null significance hypothesis that the effect is equal to zero
## TOST: don't reject null equivalence hypothesis
##
## TOST Results
## t df p.value
## t-test 1.4845 81 0.142
## TOST Lower 3.7484 81 < 0.001
## TOST Upper -0.7793 81 0.219
##
## Effect Sizes
## Estimate SE C.I. Conf. Level
## Raw 0.02057 0.01386 [-0.0025, 0.0436] 0.9
## Hedges's g 0.16241 0.11116 [-0.0193, 0.3431] 0.9
## Note: SMD confidence intervals are an approximation. See vignette("SMD_calcs").
As in the main analysis, the equivalence test was also not significant, t(81)=-0.78, p=.22.
#word recognition for typical items
m_1_1_3_typ_short_window <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_typ + (1+ typicality_condition_typ|sub_num),data=avg_corrected_target_looking_by_typicality_short_window,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_typ_short_window)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_typ +
## (1 + typicality_condition_typ | sub_num)
## Data: avg_corrected_target_looking_by_typicality_short_window
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -284.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.3923 -0.4351 0.1139 0.5287 1.5835
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.004851 0.06965
## typicality_condition_typ 0.005233 0.07234 0.58
## Residual 0.005258 0.07251
## Number of obs: 164, groups: sub_num, 82
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.07806 0.01110 80.99961 7.031 5.89e-10 ***
## typicality_condition_typ 0.02057 0.01386 81.00018 1.485 0.142
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.649
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_1_1_3_typ_short_window,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.056299711 0.09982341
## typicality_condition_typ -0.006588705 0.04773438
#effect size
cohens_d(avg_corrected_target_looking_by_typicality_short_window$average_corrected_target_looking[avg_corrected_target_looking_by_typicality_short_window$condition=="typical"])
## Cohen's d | 95% CI
## ------------------------
## 0.78 | [0.53, 1.02]
#word recognition for atypical items
m_1_1_3_atyp_short_window <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_atyp + (1+ typicality_condition_atyp|sub_num),data=avg_corrected_target_looking_by_typicality_short_window,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_atyp_short_window)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_atyp +
## (1 + typicality_condition_atyp | sub_num)
## Data: avg_corrected_target_looking_by_typicality_short_window
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -284.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.3682 -0.4307 0.1127 0.5234 1.5675
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.004315 0.06569
## typicality_condition_atyp 0.005443 0.07378 -0.50
## Residual 0.005152 0.07178
## Number of obs: 164, groups: sub_num, 82
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.05749 0.01075 80.99992 5.350 7.97e-07 ***
## typicality_condition_atyp 0.02057 0.01386 81.00009 1.485 0.142
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.619
## optimizer (nloptwrap) convergence code: 0 (OK)
## Model is nearly unidentifiable: large eigenvalue ratio
## - Rescale variables?
confint(m_1_1_3_atyp_short_window,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.036428719 0.07854873
## typicality_condition_atyp -0.006588751 0.04773443
#effect size
cohens_d(avg_corrected_target_looking_by_typicality_short_window$average_corrected_target_looking[avg_corrected_target_looking_by_typicality_short_window$condition=="atypical"])
## Cohen's d | 95% CI
## ------------------------
## 0.59 | [0.35, 0.82]
Infants showed robust recognition of both typical (b=0.08, 95% CI [0.06, 0.10], t(81)=7.03, p<.001, Cohen’s d=0.78 [0.53, 1.02]) and atypical targets (b=0.06, 95% CI [0.04, 0.08], t(81)=5.35, p<.001, Cohen’s d=0.59 [0.35, 0.82]).
# 1.2.
# trial-level analysis of the typicality effect
trial_corrected_accuracy_short_window <- trial_corrected_accuracy_short_window %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
m_4_1_1_2 <- lmer(corrected_target_looking_short ~ 1 + typicality_condition_c +
(1 |sub_num) +
(1|target_category),
data=trial_corrected_accuracy_short_window)
summary(m_4_1_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking_short ~ 1 + typicality_condition_c +
## (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy_short_window
##
## REML criterion at convergence: 3166.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.7188 -0.6977 0.0018 0.7387 2.4109
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0016953 0.04117
## target_category (Intercept) 0.0003626 0.01904
## Residual 0.1622029 0.40274
## Number of obs: 3066, groups: sub_num, 82; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.930e-02 1.284e-02 3.891e+00 5.399 0.00616 **
## typicality_condition_c 1.546e-02 1.456e-02 2.999e+03 1.062 0.28821
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.001
confint(m_4_1_1_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.04414589 0.09445940
## typicality_condition_c -0.01306713 0.04399158
We again found no significant effect of typicality (b=0.02, 95% CI [-0.01, 0.04], t(2999)=1.06, p=.29).
# Aim 2: Typicality by Age interaction
trial_corrected_accuracy_short_window <- trial_corrected_accuracy_short_window %>%
left_join(subj_info_multisession)
m_4_1_2 <- lmer(corrected_target_looking_short ~ 1 + typicality_condition_c * age_mo_c +
(1|sub_num) +
(1|target_category),
data=trial_corrected_accuracy_short_window)
summary(m_4_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking_short ~ 1 + typicality_condition_c *
## age_mo_c + (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy_short_window
##
## REML criterion at convergence: 3173.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.73731 -0.69913 0.00321 0.74213 2.44769
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0011751 0.03428
## target_category (Intercept) 0.0003739 0.01934
## Residual 0.1622103 0.40275
## Number of obs: 3066, groups: sub_num, 82; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.885e-02 1.269e-02 3.590e+00 5.424 0.00760
## typicality_condition_c 1.526e-02 1.456e-02 2.999e+03 1.048 0.29471
## age_mo_c 1.679e-02 5.616e-03 8.566e+01 2.989 0.00366
## typicality_condition_c:age_mo_c 6.881e-03 9.968e-03 2.997e+03 0.690 0.49005
##
## (Intercept) **
## typicality_condition_c
## age_mo_c **
## typicality_condition_c:age_mo_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ ag_m_c
## typclty_cn_ 0.001
## age_mo_c -0.017 0.005
## typclt__:__ 0.003 -0.034 -0.001
confint(m_4_1_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.04396814 0.09372988
## typicality_condition_c -0.01328057 0.04380545
## age_mo_c 0.00577754 0.02779367
## typicality_condition_c:age_mo_c -0.01265606 0.02641858
As in the main analysis, we also found no interaction between age and typicality (Aim 2). Age remained a significant predictor of accuracy.
#Aim 3
#parent typicality ratings as a predictor
m_4_1_3 <- lmer(corrected_target_looking_short ~ 1 + target_parent_typicality_rating_z + age_mo_c + (1|sub_num) + (1|target_category), trial_corrected_accuracy_short_window)
summary(m_4_1_3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## corrected_target_looking_short ~ 1 + target_parent_typicality_rating_z +
## age_mo_c + (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy_short_window
##
## REML criterion at convergence: 2793.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.73384 -0.69325 0.00725 0.73690 2.48279
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0011102 0.03332
## target_category (Intercept) 0.0005681 0.02383
## Residual 0.1627387 0.40341
## Number of obs: 2693, groups: sub_num, 72; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 7.137e-02 1.478e-02 3.448e+00 4.829
## target_parent_typicality_rating_z 1.036e-02 8.036e-03 2.372e+03 1.289
## age_mo_c 1.555e-02 5.921e-03 6.875e+01 2.626
## Pr(>|t|)
## (Intercept) 0.0122 *
## target_parent_typicality_rating_z 0.1974
## age_mo_c 0.0106 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) tr____
## trgt_prn___ 0.007
## age_mo_c 0.009 0.011
m_4_1_3_tidy <- m_4_1_3 %>%
summarize_mixed_effects_model()
Caregiver report of exemplar typicality did not significantly predict infants’ baseline-corrected word recognition accuracy (\(\hat{\beta} = 0.01\), 95% CI \([-0.01, 0.03]\), \(t(2,372.40) = 1.29\), \(p = .197\)). After controlling for the effect of parental report of typicality, age remained a significant predictor of infants’ word recognition (\(\hat{\beta} = 0.02\), 95% CI \([0.00, 0.03]\), \(t(68.75) = 2.63\), \(p = .011\)).
We were unable to conduct a robustness analysis excluding words marked as unknown by caregivers due to the omission of the MCDI during data collection.
To assess the consistency of typicality effects across multiple indices of word recognition, we fit the main two models in Aim 1 (section 1.1 and 1.2 in the Analysis Plan), testing for a typicality effect at the participant level and at the trial level, using a second dependent measure: reaction time.
Reaction times are computed using helper functions in the R scripts rt_helper.R and compute_rt.R.
Here, we load in the trial-by-trial reaction time data and join it with other trial properties.
#load RT data
rt_path <- here::here("..","..","data","processed_data","CATegories_exp2_RT_by_trial.csv")
d_rt <- read_csv(rt_path)
#get some other useful metadata and combine with the RT data
d_trial_level <- d %>%
distinct(sub_num,session, trial_number,condition, age, age_mo, target_image, child_gender,target_category,exclude_technical_issue,exclude_frame_rate)
d_rt<- d_rt %>%
left_join(d_trial_level)
#participants must contribute 4 typical and 4 atypical trials to be included in analysis
d_rt_subj_summary <- d_rt %>%
#we only care about distractor-to-target shifts (not e.g. T-D, target-distractor shifts)
filter(shift_type == "D-T")%>%
#only include RTs within the critical window
filter(rt>=300) %>%
filter(rt<=2800) %>%
#only include participants from the final sample
filter(exclude_participant==0) %>%
#exclude any trials with technical issues or frame rate issues
filter(exclude_frame_rate==0) %>%
filter(exclude_technical_issue==0) %>%
group_by(sub_num, condition) %>%
summarize(
trials = n()
) %>%
pivot_wider(names_from=condition,names_prefix="useable_rt_trials_",values_from=trials) %>%
# only include participants with at least 4 reaction time trials of each type
mutate(
sufficient_rt_trials=case_when(
is.na(useable_rt_trials_typical) ~ 0,
is.na(useable_rt_trials_atypical) ~ 0,
useable_rt_trials_typical>=4 & useable_rt_trials_atypical>=4 ~ 1,
TRUE ~ 0)
)
#add exclusionary criteria to DF
d_rt <- d_rt %>%
left_join(d_rt_subj_summary)
#apply exclusions and store the final RT dataset to use in analyses
d_rt_final <- d_rt %>%
filter(shift_type == "D-T")%>%
#only include RTs within the critical window
filter(rt>=300) %>%
filter(rt<=2800) %>%
#only include participants from the final sample
filter(exclude_participant==0) %>%
#exclude any trials with technical issues or frame rate issues
filter(exclude_frame_rate==0) %>%
filter(exclude_technical_issue==0) %>%
filter(sufficient_rt_trials==1)
hist(filter(d_rt_final)$rt)
#log-transofrm rt
d_rt_final <- d_rt_final %>%
mutate(
log_rt = log(rt),
log_shift_start_rt=log(shift_start_rt)
)
The data are right skewed, which is common for RTs. We will use log transformations in the subsequent models to account for the distribution of the data.
#summarize reaction time by participant and condition
avg_subj_rt <- d_rt_final %>%
group_by(sub_num, child_gender,condition) %>%
summarize(N=n(),
average_rt=mean(rt,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(rt,na.rm=T)/sqrt(N),
lower_ci=average_rt-ci,
upper_ci=average_rt+ci,
average_log_rt=mean(log_rt,na.rm=TRUE),
log_rt_ci=qt(0.975, N-1)*sd(log_rt,na.rm=T)/sqrt(N),
lower_log_rt_ci=average_log_rt-log_rt_ci,
upper_log_rt_ci=average_log_rt+log_rt_ci) %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
)
)
#overall reaction times
overall_rt <- avg_subj_rt %>%
group_by(condition) %>%
summarize(N=n(),
avg_rt=mean(average_rt,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_rt,na.rm=T)/sqrt(N),
lower_ci=avg_rt-ci,
upper_ci=avg_rt+ci)
overall_rt %>%
knitr::kable()
| condition | N | avg_rt | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|
| atypical | 73 | 912.4030 | 63.01259 | 849.3904 | 975.4156 |
| typical | 73 | 893.2222 | 53.76717 | 839.4550 | 946.9893 |
# participant-level model testing the typicality effect
m_4_3_1 <- lmer(average_log_rt ~ 1 + typicality_condition_c + (1|sub_num),data=avg_subj_rt)
summary(m_4_3_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_log_rt ~ 1 + typicality_condition_c + (1 | sub_num)
## Data: avg_subj_rt
##
## REML criterion at convergence: 8.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.31039 -0.59944 0.02166 0.54475 2.54113
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.02292 0.1514
## Residual 0.03991 0.1998
## Number of obs: 146, groups: sub_num, 73
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.65188 0.02424 72.00000 274.466 <2e-16 ***
## typicality_condition_c -0.01262 0.03307 72.00000 -0.382 0.704
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.000
confint(m_4_3_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sigma NA NA
## (Intercept) 6.60437915 6.69938141
## typicality_condition_c -0.07743288 0.05218727
d_rt_final <- d_rt_final %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
)
)
m_4_3_2 <- lmer(log_rt ~ 1 + typicality_condition_c +
(1|sub_num) +
(1|target_category),
data=d_rt_final)
summary(m_4_3_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## log_rt ~ 1 + typicality_condition_c + (1 | sub_num) + (1 | target_category)
## Data: d_rt_final
##
## REML criterion at convergence: 1704.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.06751 -0.69950 -0.04359 0.60787 2.46816
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.01850 0.1360
## target_category (Intercept) 0.00243 0.0493
## Residual 0.26711 0.5168
## Number of obs: 1081, groups: sub_num, 73; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.64235 0.03342 4.96794 198.750 6.97e-11 ***
## typicality_condition_c -0.01232 0.03183 1042.52347 -0.387 0.699
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.004
confint(m_4_3_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 6.57684563 6.70785239
## typicality_condition_c -0.07469865 0.05005745
Given the multi-session structure of our data collection procedure, we also tested whether analyses held individually in each test session, by repeating the analyses above while including an interaction with test session (session 1 vs. session 2).
# summarize participants' overall average accuracy by session
avg_corrected_target_looking_by_session <- trial_corrected_accuracy %>%
group_by(sub_num,session,age,age_mo,days_between_sessions) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci)
# summarize average accuracy within participant, split by session and typicality
avg_corrected_target_looking_by_typicality_session <- trial_corrected_accuracy %>%
group_by(sub_num,session,age,age_mo,days_between_sessions, condition) %>%
summarize(N=n(),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
se=sd(corrected_target_looking,na.rm=T)/sqrt(N),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
lower_se=average_corrected_target_looking-se,
upper_se=average_corrected_target_looking+se,
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
critical_window_ci = qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
critical_window_lower_ci=average_critical_window_looking-critical_window_ci,
critical_window_upper_ci=average_critical_window_looking+critical_window_ci)
avg_corrected_target_looking_by_typicality_session <- avg_corrected_target_looking_by_typicality_session %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
session_c = session - 1.5
)
m_4_4_1 <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c*session_c + (1+session_c|sub_num),data=avg_corrected_target_looking_by_typicality_session)
summary(m_4_4_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_c *
## session_c + (1 + session_c | sub_num)
## Data: avg_corrected_target_looking_by_typicality_session
##
## REML criterion at convergence: -393.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.9777 -0.6671 -0.0126 0.5817 3.2498
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.003352 0.05790
## session_c 0.003105 0.05572 -0.60
## Residual 0.013120 0.11454
## Number of obs: 327, groups: sub_num, 84
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.070725 0.008963 81.190001 7.891
## typicality_condition_c 0.017785 0.012677 155.822578 1.403
## session_c 0.009662 0.014095 81.419191 0.686
## typicality_condition_c:session_c -0.040695 0.025353 155.822579 -1.605
## Pr(>|t|)
## (Intercept) 1.21e-11 ***
## typicality_condition_c 0.163
## session_c 0.495
## typicality_condition_c:session_c 0.110
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ sssn_c
## typclty_cn_ 0.003
## session_c -0.165 -0.003
## typclty__:_ -0.003 0.021 0.003
confint(m_4_4_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.05315866 0.088291820
## typicality_condition_c -0.00706130 0.042630647
## session_c -0.01796358 0.037287931
## typicality_condition_c:session_c -0.09038658 0.008997312
Below, we summarize participants’ average looking in the typical/atypical conditions by session
#baseline-corrected target looking summarized overall
overall_corrected_target_looking_by_typicality_session <- avg_corrected_target_looking_by_typicality_session %>%
group_by(session,condition) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci)
overall_corrected_target_looking_by_typicality_session %>%
knitr::kable()
| session | condition | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|---|
| 1 | atypical | 84 | 0.0468282 | 0.0288367 | 0.0179915 | 0.0756648 |
| 1 | typical | 83 | 0.0869929 | 0.0313197 | 0.0556733 | 0.1183126 |
| 2 | atypical | 80 | 0.0765215 | 0.0272729 | 0.0492486 | 0.1037944 |
| 2 | typical | 80 | 0.0739589 | 0.0276708 | 0.0462881 | 0.1016297 |
trial_corrected_accuracy <- trial_corrected_accuracy %>%
mutate(
session_c = session - 1.5
)
m_4_4_2 <- lmer(corrected_target_looking ~ 1 + typicality_condition_c*session_c +
(1+session_c |sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_4_4_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_c * session_c +
## (1 + session_c | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2258.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1876 -0.6206 -0.0278 0.6618 2.7884
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.0028156 0.05306
## session_c 0.0001582 0.01258 -0.41
## target_category (Intercept) 0.0002485 0.01576
## Residual 0.1185757 0.34435
## Number of obs: 3088, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.07294 0.01161 5.27112 6.283
## typicality_condition_c 0.01110 0.01241 2945.59039 0.894
## session_c 0.00293 0.01262 74.14827 0.232
## typicality_condition_c:session_c -0.03065 0.02481 2949.11150 -1.236
## Pr(>|t|)
## (Intercept) 0.00124 **
## typicality_condition_c 0.37123
## session_c 0.81698
## typicality_condition_c:session_c 0.21670
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ sssn_c
## typclty_cn_ 0.000
## session_c -0.033 -0.006
## typclty__:_ -0.004 -0.020 0.001
confint(m_4_4_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sig04 NA NA
## .sigma NA NA
## (Intercept) 0.05018805 0.09569259
## typicality_condition_c -0.01322543 0.03542345
## session_c -0.02179676 0.02765717
## typicality_condition_c:session_c -0.07927601 0.01796975
Images were classified as typical vs. atypical based on an adult norming study. In this analysis, we investigated whether treating typicality as a continuous metric, using the norming ratings as a predictor, would provide us more power to detect a typicality effect.
m_5_1_1 <- lmer(corrected_target_looking ~ 1 + target_typicality_z +
(1|sub_num)+
(1|target_category),
data=trial_corrected_accuracy)
summary(m_5_1_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + target_typicality_z + (1 | sub_num) +
## (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2246.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1798 -0.6232 -0.0247 0.6630 2.7694
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0028206 0.05311
## target_category (Intercept) 0.0002682 0.01638
## Residual 0.1184828 0.34421
## Number of obs: 3088, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.304e-02 1.182e-02 5.159e+00 6.18 0.00145 **
## target_typicality_z 1.176e-02 6.354e-03 2.891e+03 1.85 0.06435 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trgt_typcl_ 0.004
confint(m_5_1_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.0498745657 0.09620693
## target_typicality_z -0.0006958822 0.02421020
Quick plot. Descriptively, it appears that perhaps there is a decrement in looking for items on the extreme end of typicality, and otherwise fairly similar target looking.
ggplot(trial_corrected_accuracy,aes(target_typicality_z,corrected_target_looking))+
geom_point(aes(color=condition))+
geom_hline(yintercept=0, linetype="dashed")+
geom_smooth()
m_5_1_2 <- lmer(corrected_target_looking ~ 1 + target_typicality_z + distractor_typicality_z+
(1+target_typicality_z + distractor_typicality_z|sub_num)+
(1|target_category),
data=trial_corrected_accuracy)
summary(m_5_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## corrected_target_looking ~ 1 + target_typicality_z + distractor_typicality_z +
## (1 + target_typicality_z + distractor_typicality_z | sub_num) +
## (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2248.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.2103 -0.6176 -0.0250 0.6570 2.8127
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.0028677 0.05355
## target_typicality_z 0.0033167 0.05759 0.27
## distractor_typicality_z 0.0033408 0.05780 -0.14 -0.99
## target_category (Intercept) 0.0002616 0.01617
## Residual 0.1175638 0.34288
## Number of obs: 3088, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.073290 0.011762 5.201755 6.231 0.00135 **
## target_typicality_z 0.006952 0.014313 97.926779 0.486 0.62828
## distractor_typicality_z 0.005153 0.014234 91.456627 0.362 0.71817
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trgt__
## trgt_typcl_ 0.066
## dstrctr_ty_ -0.037 -0.894
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(m_5_1_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.0498745657 0.09620693
## target_typicality_z -0.0006958822 0.02421020
Next, we investigate category- (target word) and item-level (target image) variation in proportion target looking.
# summarize average accuracy within participant (by word alone)
avg_corrected_target_looking_by_word <- trial_corrected_accuracy %>%
group_by(sub_num, target_category) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
critical_window_ci = qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
critical_window_lower_ci=average_critical_window_looking-critical_window_ci,
critical_window_upper_ci=average_critical_window_looking+critical_window_ci,
average_baseline_window_looking=mean(mean_target_looking_baseline,na.rm=TRUE),
baseline_window_ci = qt(0.975, N-1)*sd(mean_target_looking_baseline,na.rm=T)/sqrt(N),
baseline_window_lower_ci=average_baseline_window_looking-baseline_window_ci,
baseline_window_upper_ci=average_baseline_window_looking+baseline_window_ci)
# summarize average accuracy within participant (by word split by typicality)
avg_corrected_target_looking_by_typicality_word <- trial_corrected_accuracy %>%
group_by(sub_num, condition,target_category) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
average_baseline_window_looking=mean(mean_target_looking_baseline,na.rm=TRUE))
#overall summarized looking
#by word alone
overall_target_looking_by_word <- avg_corrected_target_looking_by_word %>%
group_by(target_category) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci,
target_looking_critical_window=mean(average_critical_window_looking,na.rm=TRUE),
ci_critical_window=qt(0.975, N-1)*sd(average_critical_window_looking,na.rm=T)/sqrt(N),
lower_ci_critical_window=target_looking_critical_window-ci_critical_window,
upper_ci_critical_window=target_looking_critical_window+ci_critical_window,
target_looking_baseline_window=mean(average_baseline_window_looking,na.rm=TRUE),
ci_baseline_window=qt(0.975, N-1)*sd(average_baseline_window_looking,na.rm=T)/sqrt(N),
lower_ci_baseline_window=target_looking_baseline_window-ci_baseline_window,
upper_ci_baseline_window=target_looking_baseline_window+ci_baseline_window
)
#by word split by typicality
overall_target_looking_by_typicality_word <- avg_corrected_target_looking_by_typicality_word %>%
group_by(condition,target_category) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci,
target_looking_critical_window=mean(average_critical_window_looking,na.rm=TRUE),
ci_critical_window=qt(0.975, N-1)*sd(average_critical_window_looking,na.rm=T)/sqrt(N),
lower_ci_critical_window=target_looking_critical_window-ci_critical_window,
upper_ci_critical_window=target_looking_critical_window+ci_critical_window,
target_looking_baseline_window=mean(average_baseline_window_looking,na.rm=TRUE),
ci_baseline_window=qt(0.975, N-1)*sd(average_baseline_window_looking,na.rm=T)/sqrt(N),
lower_ci_baseline_window=target_looking_baseline_window-ci_baseline_window,
upper_ci_baseline_window=target_looking_baseline_window+ci_baseline_window
)
Overall, participants showed robust learning of all four words, in both typicality conditions.
overall_target_looking_by_word %>%
select(target_category:upper_ci) %>%
knitr::kable()
| target_category | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|
| bird | 84 | 0.0828352 | 0.0289568 | 0.0538784 | 0.1117921 |
| cat | 84 | 0.0846898 | 0.0274928 | 0.0571970 | 0.1121825 |
| dog | 84 | 0.0452197 | 0.0253637 | 0.0198560 | 0.0705835 |
| fish | 84 | 0.0645363 | 0.0307689 | 0.0337674 | 0.0953052 |
overall_target_looking_by_typicality_word %>%
select(target_category:upper_ci) %>%
knitr::kable()
| condition | target_category | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|---|
| atypical | bird | 84 | 0.0952772 | 0.0412225 | 0.0540548 | 0.1364997 |
| atypical | cat | 84 | 0.0721099 | 0.0385986 | 0.0335113 | 0.1107084 |
| atypical | dog | 84 | 0.0237760 | 0.0393909 | -0.0156149 | 0.0631669 |
| atypical | fish | 84 | 0.0681816 | 0.0426735 | 0.0255081 | 0.1108551 |
| typical | bird | 84 | 0.0728658 | 0.0360956 | 0.0367702 | 0.1089614 |
| typical | cat | 84 | 0.1039214 | 0.0386892 | 0.0652323 | 0.1426106 |
| typical | dog | 84 | 0.0710628 | 0.0354239 | 0.0356389 | 0.1064867 |
| typical | fish | 84 | 0.0597611 | 0.0415034 | 0.0182577 | 0.1012645 |
#checking the typicality effect for bird
t.test(
filter(avg_corrected_target_looking_by_typicality_word,condition=="typical"&target_category=="bird")$average_corrected_target_looking,
filter(avg_corrected_target_looking_by_typicality_word,condition=="atypical"&target_category=="bird")$average_corrected_target_looking,
paired=TRUE
)
##
## Paired t-test
##
## data: filter(avg_corrected_target_looking_by_typicality_word, condition == "typical" & target_category == "bird")$average_corrected_target_looking and filter(avg_corrected_target_looking_by_typicality_word, condition == "atypical" & target_category == "bird")$average_corrected_target_looking
## t = -0.91048, df = 83, p-value = 0.3652
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -0.07136948 0.02654664
## sample estimates:
## mean difference
## -0.02241142
#checking the typicality effect for bird
t.test(
filter(avg_corrected_target_looking_by_typicality_word,condition=="typical"&target_category=="cat")$average_corrected_target_looking,
filter(avg_corrected_target_looking_by_typicality_word,condition=="atypical"&target_category=="cat")$average_corrected_target_looking,
paired=TRUE
)
##
## Paired t-test
##
## data: filter(avg_corrected_target_looking_by_typicality_word, condition == "typical" & target_category == "cat")$average_corrected_target_looking and filter(avg_corrected_target_looking_by_typicality_word, condition == "atypical" & target_category == "cat")$average_corrected_target_looking
## t = 1.2173, df = 83, p-value = 0.2269
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -0.02016634 0.08378945
## sample estimates:
## mean difference
## 0.03181156
#checking the typicality effect for dog
t.test(
filter(avg_corrected_target_looking_by_typicality_word,condition=="typical"&target_category=="dog")$average_corrected_target_looking,
filter(avg_corrected_target_looking_by_typicality_word,condition=="atypical"&target_category=="dog")$average_corrected_target_looking,
paired=TRUE
)
##
## Paired t-test
##
## data: filter(avg_corrected_target_looking_by_typicality_word, condition == "typical" & target_category == "dog")$average_corrected_target_looking and filter(avg_corrected_target_looking_by_typicality_word, condition == "atypical" & target_category == "dog")$average_corrected_target_looking
## t = 1.7506, df = 83, p-value = 0.08371
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -0.006439375 0.101012983
## sample estimates:
## mean difference
## 0.0472868
#checking the typicality effect for fish
t.test(
filter(avg_corrected_target_looking_by_typicality_word,condition=="typical"&target_category=="fish")$average_corrected_target_looking,
filter(avg_corrected_target_looking_by_typicality_word,condition=="atypical"&target_category=="fish")$average_corrected_target_looking,
paired=TRUE
)
##
## Paired t-test
##
## data: filter(avg_corrected_target_looking_by_typicality_word, condition == "typical" & target_category == "fish")$average_corrected_target_looking and filter(avg_corrected_target_looking_by_typicality_word, condition == "atypical" & target_category == "fish")$average_corrected_target_looking
## t = -0.28603, df = 83, p-value = 0.7756
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -0.06697470 0.05013365
## sample estimates:
## mean difference
## -0.008420526
pal <- wes_palette("Rushmore1", n=5)
set.seed(1)
jitterer <- position_jitter(width = .05,seed=1)
p3 <- ggplot(avg_corrected_target_looking_by_typicality_word,aes(x=condition,y=average_corrected_target_looking, fill=condition))+
geom_half_violin(data=filter(avg_corrected_target_looking_by_typicality_word, condition=="atypical"),position = position_nudge(x = -.1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="l")+
geom_half_violin(data=filter(avg_corrected_target_looking_by_typicality_word, condition=="typical"),position = position_nudge(x = .1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="r")+
geom_path(aes(group=sub_num),color="black",fill=NA,alpha=0.05,size=0.75,position=jitterer)+
geom_point(aes(color=condition,group=sub_num), size = 2.5, alpha=0.05,position=jitterer)+
geom_point(data=overall_target_looking_by_typicality_word,aes(y=corrected_target_looking),color="black",size=1.8)+
geom_line(data=overall_target_looking_by_typicality_word,aes(y=corrected_target_looking,group=1),color="black",size=1.5)+
geom_errorbar(data=overall_target_looking_by_typicality_word,aes(y=corrected_target_looking,ymin=lower_ci,ymax=upper_ci),width=0,color="black")+
#geom_boxplot(outlier.shape = NA, alpha = .5, width = .1, colour = "black")+
#scale_colour_brewer(palette = "Dark2")+
#scale_fill_brewer(palette = "Dark2")+
geom_hline(yintercept=0,linetype="dashed")+
scale_colour_manual(values=pal[c(3,4)])+
scale_fill_manual(values=pal[c(3,4)])+
facet_wrap(.~target_category)+
theme(legend.position="none")+
xlab("Typicality Condition")+
ylab("Baseline-Corrected\nProportion Target Looking")+
theme(axis.title.x = element_text(face="bold", size=20),
axis.text.x = element_text(size=16),
axis.title.y = element_text(face="bold", size=20),
axis.text.y = element_text(size=16),
strip.text.x = element_text(size = 16,face="bold"))
ggsave(here::here("..","figures","baseline_corrected_accuracy_by_category.png"),width=7,height=6)
# summarize average accuracy within participant
avg_corrected_target_looking_by_image <- trial_corrected_accuracy %>%
group_by(sub_num, condition,target_category,target_image,target_typicality_z) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
average_baseline_window_looking=mean(mean_target_looking_baseline,na.rm=TRUE))
#baseline-corrected target looking summarized overall
overall_target_looking_by_image <- avg_corrected_target_looking_by_image %>%
group_by(condition,target_category,target_image,target_typicality_z) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci,
target_looking_critical_window=mean(average_critical_window_looking,na.rm=TRUE),
ci_critical_window=qt(0.975, N-1)*sd(average_critical_window_looking,na.rm=T)/sqrt(N),
lower_ci_critical_window=target_looking_critical_window-ci_critical_window,
upper_ci_critical_window=target_looking_critical_window+ci_critical_window,
target_looking_baseline_window=mean(average_baseline_window_looking,na.rm=TRUE),
ci_baseline_window=qt(0.975, N-1)*sd(average_baseline_window_looking,na.rm=T)/sqrt(N),
lower_ci_baseline_window=target_looking_baseline_window-ci_baseline_window,
upper_ci_baseline_window=target_looking_baseline_window+ci_baseline_window
) %>%
rename(target_image_name=target_image) %>%
mutate(target_image=str_replace(target_image_name,"_600x600",""),
target_image_path=here("images",paste(target_image_name,".png",sep="")))
overall_target_looking_by_image %>%
ungroup() %>%
relocate(target_image) %>%
select(-target_image_path,-target_image_name) %>%
knitr::kable()
| target_image | condition | target_category | target_typicality_z | N | corrected_target_looking | ci | lower_ci | upper_ci | target_looking_critical_window | ci_critical_window | lower_ci_critical_window | upper_ci_critical_window | target_looking_baseline_window | ci_baseline_window | lower_ci_baseline_window | upper_ci_baseline_window |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| heron | atypical | bird | -0.2912701 | 82 | 0.1408195 | 0.0662388 | 0.0745807 | 0.2070583 | 0.5547662 | 0.0555783 | 0.4991879 | 0.6103445 | 0.4139467 | 0.0401921 | 0.3737546 | 0.4541388 |
| kingfisher | atypical | bird | -0.1281589 | 79 | 0.0382778 | 0.0710872 | -0.0328093 | 0.1093650 | 0.4856289 | 0.0624170 | 0.4232120 | 0.5480459 | 0.4473511 | 0.0419578 | 0.4053933 | 0.4893090 |
| kookaburra | atypical | bird | -1.0835249 | 78 | 0.1066172 | 0.0567018 | 0.0499154 | 0.1633190 | 0.5541817 | 0.0470623 | 0.5071194 | 0.6012440 | 0.4475645 | 0.0412278 | 0.4063367 | 0.4887924 |
| cornishrex | atypical | cat | -1.1767313 | 80 | 0.0763216 | 0.0675627 | 0.0087589 | 0.1438843 | 0.5409182 | 0.0587443 | 0.4821739 | 0.5996625 | 0.4645966 | 0.0395096 | 0.4250871 | 0.5041062 |
| oriental | atypical | cat | -1.1301281 | 80 | 0.1527616 | 0.0690103 | 0.0837513 | 0.2217719 | 0.6565296 | 0.0531853 | 0.6033443 | 0.7097148 | 0.5037680 | 0.0468078 | 0.4569601 | 0.5505758 |
| sphynx | atypical | cat | -1.8058748 | 83 | 0.0003310 | 0.0680594 | -0.0677284 | 0.0683904 | 0.6153968 | 0.0496319 | 0.5657649 | 0.6650288 | 0.6150658 | 0.0433231 | 0.5717427 | 0.6583890 |
| afghanhound | atypical | dog | -1.0136201 | 79 | 0.0621100 | 0.0636254 | -0.0015154 | 0.1257354 | 0.6036402 | 0.0522234 | 0.5514168 | 0.6558636 | 0.5415302 | 0.0419835 | 0.4995466 | 0.5835137 |
| bassethound | atypical | dog | -0.5708895 | 81 | 0.0432086 | 0.0742969 | -0.0310883 | 0.1175054 | 0.6520520 | 0.0542444 | 0.5978076 | 0.7062964 | 0.6088435 | 0.0478269 | 0.5610165 | 0.6566704 |
| sheepdog | atypical | dog | -1.4563507 | 82 | -0.0268143 | 0.0617432 | -0.0885574 | 0.0349289 | 0.4743607 | 0.0503071 | 0.4240536 | 0.5246678 | 0.5011750 | 0.0415525 | 0.4596225 | 0.5427275 |
| betafish | atypical | fish | -0.5475879 | 83 | 0.0882696 | 0.0645190 | 0.0237506 | 0.1527886 | 0.5323232 | 0.0579734 | 0.4743497 | 0.5902966 | 0.4440536 | 0.0394941 | 0.4045594 | 0.4835477 |
| lionfish | atypical | fish | -0.9670168 | 80 | 0.0029581 | 0.0692744 | -0.0663163 | 0.0722325 | 0.5203641 | 0.0572911 | 0.4630731 | 0.5776552 | 0.5174060 | 0.0401878 | 0.4772183 | 0.5575938 |
| sturgeon | atypical | fish | -0.7573023 | 81 | 0.1372774 | 0.0735684 | 0.0637090 | 0.2108458 | 0.4896405 | 0.0612244 | 0.4284161 | 0.5508649 | 0.3523631 | 0.0476130 | 0.3047501 | 0.3999761 |
| cardinal | typical | bird | 0.6873975 | 83 | 0.0761337 | 0.0577969 | 0.0183368 | 0.1339306 | 0.5527972 | 0.0460688 | 0.5067284 | 0.5988660 | 0.4766635 | 0.0415477 | 0.4351158 | 0.5182111 |
| robin | typical | bird | 0.7340007 | 82 | 0.1123138 | 0.0706687 | 0.0416452 | 0.1829825 | 0.5664190 | 0.0527234 | 0.5136956 | 0.6191424 | 0.4541051 | 0.0422689 | 0.4118363 | 0.4963740 |
| sparrow | typical | bird | 1.0369217 | 83 | 0.0294041 | 0.0489869 | -0.0195828 | 0.0783909 | 0.5079121 | 0.0431239 | 0.4647882 | 0.5510359 | 0.4785080 | 0.0381717 | 0.4403363 | 0.5166796 |
| arabianmau | typical | cat | 0.8272072 | 82 | 0.0719848 | 0.0501193 | 0.0218655 | 0.1221041 | 0.5958545 | 0.0504340 | 0.5454205 | 0.6462885 | 0.5238697 | 0.0351796 | 0.4886901 | 0.5590493 |
| chartreux | typical | cat | 0.6407943 | 81 | 0.1536233 | 0.0723942 | 0.0812291 | 0.2260175 | 0.5942010 | 0.0568159 | 0.5373850 | 0.6510169 | 0.4405777 | 0.0513137 | 0.3892640 | 0.4918914 |
| tabby | typical | cat | 1.3631442 | 82 | 0.0860622 | 0.0732891 | 0.0127732 | 0.1593513 | 0.6226850 | 0.0616445 | 0.5610405 | 0.6843294 | 0.5366227 | 0.0420258 | 0.4945969 | 0.5786485 |
| beagle | typical | dog | 1.1534297 | 79 | 0.1038370 | 0.0651526 | 0.0386844 | 0.1689896 | 0.6290305 | 0.0515945 | 0.5774359 | 0.6806250 | 0.5251935 | 0.0392946 | 0.4858989 | 0.5644881 |
| germanshepherd | typical | dog | 1.1767313 | 81 | 0.0822380 | 0.0647282 | 0.0175098 | 0.1469662 | 0.5672133 | 0.0478993 | 0.5193139 | 0.6151126 | 0.4849753 | 0.0410271 | 0.4439483 | 0.5260024 |
| goldenretriever | typical | dog | 0.9670168 | 83 | 0.0259201 | 0.0582506 | -0.0323305 | 0.0841707 | 0.5466143 | 0.0462486 | 0.5003657 | 0.5928629 | 0.5206942 | 0.0420201 | 0.4786741 | 0.5627143 |
| bass | typical | fish | 0.7806040 | 81 | 0.0633932 | 0.0683873 | -0.0049941 | 0.1317805 | 0.5240357 | 0.0597066 | 0.4643291 | 0.5837422 | 0.4606424 | 0.0378064 | 0.4228361 | 0.4984488 |
| bluegill | typical | fish | 0.8039056 | 81 | 0.0721545 | 0.0641440 | 0.0080105 | 0.1362984 | 0.5666383 | 0.0535123 | 0.5131260 | 0.6201506 | 0.4944838 | 0.0430493 | 0.4514345 | 0.5375332 |
| clownfish | typical | fish | 0.7573023 | 80 | 0.0423539 | 0.0621051 | -0.0197512 | 0.1044590 | 0.5335050 | 0.0493777 | 0.4841273 | 0.5828827 | 0.4911511 | 0.0449664 | 0.4461847 | 0.5361175 |
ggplot(overall_target_looking_by_image,aes(reorder(target_image,corrected_target_looking),corrected_target_looking))+
geom_image(aes(y=corrected_target_looking+0.1,image=target_image_path),size=.1)+
geom_hline(yintercept=0,linetype="dashed")+
geom_errorbar(aes(ymin=lower_ci,ymax=upper_ci,color=condition),width=0)+
geom_point(aes(color=condition),size=3)+
xlab("Target Image")+
ylab("Baseline-Corrected\nProportion Target Looking")+
theme(axis.title.x = element_text(face="bold", size=20),
axis.text.x = element_text(size=16,angle=90,vjust=0.5),
axis.title.y = element_text(face="bold", size=20),
axis.text.y = element_text(size=16),
strip.text.x = element_text(size = 16,face="bold"),
legend.position=c(0.8,0.15)
)+
scale_color_manual(values=pal[c(3,4)])
ggsave(here::here("..","figures","baseline_corrected_accuracy_by_word.png"),width=9,height=6)
df_for_icc <- trial_corrected_accuracy %>%
#some light renaming to use Peekbank icc function
mutate(
administration_id = sub_num
) %>%
unite("trial_id", administration_id, trial_number,remove=F) %>%
mutate(
target_label=target_image
)
#ICC for participants
icc_participants <- df_for_icc %>%
get_icc(object = "administration",column="corrected_target_looking",type_icc="consistency")
icc_participants_baseline <- df_for_icc %>%
get_icc(object = "administration",column="mean_target_looking_baseline",type_icc="consistency")
icc_participants_critical <- df_for_icc %>%
get_icc(object = "administration",column="mean_target_looking_critical",type_icc="consistency")
#ICC for stimuli
icc_stimuli <- df_for_icc %>%
get_icc(object = "stimulus",column="corrected_target_looking",type_icc="consistency")
icc_stimuli_baseline <- df_for_icc %>%
get_icc(object = "stimulus",column="mean_target_looking_baseline",type_icc="consistency")
icc_stimuli_critical <- df_for_icc %>%
get_icc(object = "stimulus",column="mean_target_looking_critical",type_icc="consistency")
Participants were required to contribute at least 24 valid trials in order to be included in the final sample. Here, we explore the impact of this criterion on whether we observe a typicality effect, by estimating the typicality effect for a range of looser and stricter trial-based exclusion criteria
min_trials_required_list = c(4,8,12,16,20,24,28,32,36,40,44,48)
# set up summarized dataset to use
subj_typ_data <- trial_corrected_accuracy_all %>%
#apply exclusion criteria to trials
filter(exclude_frame_rate==0) %>%
filter(exclude_technical_issue==0) %>%
filter(useable_window==1) %>%
filter(age_exclusion==0) %>%
group_by(sub_num,condition) %>%
summarize(
N=n(),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE)
) %>%
ungroup() %>%
group_by(sub_num) %>%
mutate(total_trials=sum(N,na.rm=TRUE)) %>%
pivot_wider(names_from=condition,values_from=c(N,average_corrected_target_looking)) %>%
ungroup()
#add and apply min trial exclusion criteria,
#creating nested dataframes for each minimum trial exclusion criterion
#then test the typicality effect within each dataset and store the results
typ_effect_min_valid_trials <- expand_grid(
subj_typ_data,
min_valid_trials_req = min_trials_required_list
) %>%
#apply min trial exclusion criterion
group_by(min_valid_trials_req) %>%
filter(total_trials>=min_valid_trials_req) %>%
mutate(
N=length(sub_num)
) %>%
group_by(min_valid_trials_req,N) %>%
#nest the data
nest() %>%
mutate(t_test = map(data, ~ t.test(.x$average_corrected_target_looking_typical, .x$average_corrected_target_looking_atypical,paired = T)),
result = map(t_test, tidy)
) %>%
unnest(result)
# typ_effect_min_valid_trials %>%
# knitr::kable()
## plot the result
ggplot(typ_effect_min_valid_trials, aes(min_valid_trials_req,estimate))+
geom_errorbar(aes(ymin=conf.low,ymax=conf.high),width=0)+
geom_point(size=3)+
geom_hline(yintercept=0,linetype="dashed")+
geom_text(aes(label=N),nudge_y = 0.07)+
ylab("Estimated Participant-Level Typicality Effect")+
xlab("Minimum Number of Trials Required for Inclusion")
ggsave(here::here("..","figures","typicality_effect_min_trials_for_inclusion.png"),width=9,height=6)
In our Stage 1 manuscript, we preregistered a sample of N=80. Our current results report data from all participants who contributed valid data, leading to a final N of 84 participants. Here, we remove the data from the final 4 participants who contributed data on Lookit and re-run the main analyses from Aim 1 (1.1 and 1.2), to ensure that the decision to include all participants did not change the outcome of the study.
CAT_343, CAT_344, CAT_345, and CAT_346 were the final 4 participants contributing data on Lookit.
subj_to_remove <- c("CAT_343","CAT_344","CAT_345","CAT_346")
trial_corrected_accuracy_red <- trial_corrected_accuracy_all %>%
filter(exclude_participant==0) %>%
filter(trial_exclusion==0) %>%
filter(!(sub_num %in% subj_to_remove))
# summarize by-participant and typicality
avg_corrected_target_looking_by_typicality_red <- trial_corrected_accuracy_red %>%
group_by(sub_num, condition) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
se=sd(corrected_target_looking,na.rm=T)/sqrt(N),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
lower_se=average_corrected_target_looking-se,
upper_se=average_corrected_target_looking+se)
#baseline-corrected target looking summarized overall
overall_corrected_target_looking_by_typicality_red <- avg_corrected_target_looking_by_typicality_red %>%
group_by(condition) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci)
overall_corrected_target_looking_by_typicality_red %>%
knitr::kable()
| condition | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|
| atypical | 80 | 0.0605700 | 0.0194323 | 0.0411377 | 0.0800024 |
| typical | 80 | 0.0766292 | 0.0216437 | 0.0549855 | 0.0982729 |
avg_corrected_target_looking_by_typicality_red <- avg_corrected_target_looking_by_typicality_red %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
m_1_1_red <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c + (1+ typicality_condition_c|sub_num),data=avg_corrected_target_looking_by_typicality_red,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_red)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_c +
## (1 + typicality_condition_c | sub_num)
## Data: avg_corrected_target_looking_by_typicality_red
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -301.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.16425 -0.36910 0.07516 0.41158 1.85866
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.003331 0.05771
## typicality_condition_c 0.004636 0.06809 0.23
## Residual 0.004052 0.06366
## Number of obs: 160, groups: sub_num, 80
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.068600 0.008183 79.000504 8.383 1.56e-12 ***
## typicality_condition_c 0.016059 0.012620 78.999366 1.273 0.207
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.111
confint(m_1_1_red,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.052561283 0.08463797
## typicality_condition_c -0.008674928 0.04079339
There remained no significant effect of typicality in the participant-level analysis.
## Typical word recognition
# recentering the model on the typical condition to make the intercept interpretable
m_1_1_3_typ_red <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_typ + (1+ typicality_condition_typ|sub_num),data=avg_corrected_target_looking_by_typicality_red,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_typ_red)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_typ +
## (1 + typicality_condition_typ | sub_num)
## Data: avg_corrected_target_looking_by_typicality_red
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -301.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.3642 -0.4032 0.0821 0.4496 2.0304
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.004623 0.06799
## typicality_condition_typ 0.003069 0.05540 0.65
## Residual 0.004836 0.06954
## Number of obs: 160, groups: sub_num, 80
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.07663 0.01087 78.99978 7.047 6.07e-10 ***
## typicality_condition_typ 0.01606 0.01262 79.00009 1.273 0.207
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.664
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_1_1_3_typ_red,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.055317025 0.09794146
## typicality_condition_typ -0.008674805 0.04079327
#effect size
cohens_d(avg_corrected_target_looking_by_typicality_red$average_corrected_target_looking[avg_corrected_target_looking_by_typicality_red$condition=="typical"])
## Cohen's d | 95% CI
## ------------------------
## 0.79 | [0.53, 1.04]
## Atypical word recognition
# recentering the model on the atypical condition to make the intercept interpretable
m_1_1_3_atyp_red <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_atyp + (1+ typicality_condition_atyp|sub_num),data=avg_corrected_target_looking_by_typicality_red,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_atyp_red)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_atyp +
## (1 + typicality_condition_atyp | sub_num)
## Data: avg_corrected_target_looking_by_typicality_red
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -301.6
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.26462 -0.38622 0.07864 0.43066 1.94491
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.003188 0.05646
## typicality_condition_atyp 0.003866 0.06218 -0.29
## Residual 0.004437 0.06661
## Number of obs: 160, groups: sub_num, 80
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.060570 0.009763 78.999551 6.204 2.37e-08 ***
## typicality_condition_atyp 0.016059 0.012620 78.999798 1.273 0.207
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.553
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_1_1_3_atyp_red,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.041435242 0.07970478
## typicality_condition_atyp -0.008674823 0.04079328
#effect size
cohens_d(avg_corrected_target_looking_by_typicality_red$average_corrected_target_looking[avg_corrected_target_looking_by_typicality_red$condition=="atypical"])
## Cohen's d | 95% CI
## ------------------------
## 0.69 | [0.45, 0.94]
Infants recognized both typical and atypical exemplars, with similar effect sizes.
trial_corrected_accuracy_red <- trial_corrected_accuracy_red %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
#model with typicality random intercept has singular fit but yields basically identical results
m_1_2_red <- lmer(corrected_target_looking ~ 1 + typicality_condition_c +
(1 | sub_num) +
(1|target_category),
data=trial_corrected_accuracy_red)
summary(m_1_2_red)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_c + (1 |
## sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy_red
##
## REML criterion at convergence: 2150.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.13603 -0.61726 -0.03279 0.66064 2.75427
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0022297 0.04722
## target_category (Intercept) 0.0001792 0.01339
## Residual 0.1197725 0.34608
## Number of obs: 2926, groups: sub_num, 80; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.963e-02 1.069e-02 5.172e+00 6.514 0.00112 **
## typicality_condition_c 1.275e-02 1.281e-02 2.859e+03 0.996 0.31955
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.001
confint(m_1_2_red,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.04867897 0.09057927
## typicality_condition_c -0.01235235 0.03785493
The trial-level model also yields no significant typicality effect.
sessionInfo()
## R version 4.2.2 (2022-10-31)
## Platform: aarch64-apple-darwin20 (64-bit)
## Running under: macOS 14.2.1
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/lib/libRlapack.dylib
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] patchwork_1.1.2 agreement_0.0.0.9003 broom_1.0.4
## [4] sessioninfo_1.2.2 ggimage_0.3.3 effectsize_0.8.6
## [7] papaja_0.1.1 tinylabels_0.2.3 broom.mixed_0.2.9.4
## [10] JWileymisc_1.4.1 multilevelTools_0.1.1 car_3.1-1
## [13] carData_3.0-5 gghalves_0.1.4 wesanderson_0.3.6
## [16] TOSTER_0.7.1 lmerTest_3.1-3 lme4_1.1-31
## [19] Matrix_1.5-1 readxl_1.4.3 here_1.0.1
## [22] cowplot_1.1.1 janitor_2.2.0 lubridate_1.9.3
## [25] forcats_1.0.0 stringr_1.5.0 dplyr_1.1.2
## [28] purrr_1.0.1 readr_2.1.4 tidyr_1.3.0
## [31] tibble_3.2.1 ggplot2_3.4.4 tidyverse_2.0.0
##
## loaded via a namespace (and not attached):
## [1] utf8_1.2.2 rms_6.7-1 tidyselect_1.2.0
## [4] htmlwidgets_1.6.1 grid_4.2.2 munsell_0.5.0
## [7] ragg_1.2.5 codetools_0.2-18 future_1.31.0
## [10] withr_2.5.0 colorspace_2.0-3 fst_0.9.8
## [13] highr_0.10 knitr_1.41 rstudioapi_0.14
## [16] stats4_4.2.2 robustbase_0.99-0 ggsignif_0.6.4
## [19] listenv_0.9.0 labeling_0.4.2 emmeans_1.8.4-1
## [22] mnormt_2.1.1 bit64_4.0.5 farver_2.1.1
## [25] datawizard_0.9.0 rprojroot_2.0.3 coda_0.19-4
## [28] parallelly_1.34.0 vctrs_0.6.4 generics_0.1.3
## [31] TH.data_1.1-2 xfun_0.36 ggthemes_4.2.4
## [34] timechange_0.1.1 R6_2.5.1 VGAM_1.1-9
## [37] cachem_1.0.6 gridGraphics_0.5-1 assertthat_0.2.1
## [40] scales_1.2.1 vroom_1.6.0 multcomp_1.4-25
## [43] nnet_7.3-18 gtable_0.3.1 multcompView_0.1-9
## [46] globals_0.16.2 sandwich_3.0-2 rlang_1.1.2
## [49] MatrixModels_0.5-1 systemfonts_1.0.4 splines_4.2.2
## [52] rstatix_0.7.2 checkmate_2.1.0 yaml_2.3.6
## [55] abind_1.4-5 backports_1.4.1 Hmisc_5.1-1
## [58] tools_4.2.2 psych_2.3.3 lavaan_0.6-16
## [61] ggplotify_0.1.2 ellipsis_0.3.2 jquerylib_0.1.4
## [64] extraoperators_0.1.1 Rcpp_1.0.9 base64enc_0.1-3
## [67] ggpubr_0.6.0 rpart_4.1.19 zoo_1.8-11
## [70] cluster_2.1.4 fs_1.5.2 furrr_0.3.1
## [73] magrittr_2.0.3 data.table_1.14.8 magick_2.7.3
## [76] ggdist_3.3.0 SparseM_1.81 mvtnorm_1.1-3
## [79] mitml_0.4-5 hms_1.1.2 evaluate_0.19
## [82] xtable_1.8-4 gridExtra_2.3 shape_1.4.6
## [85] compiler_4.2.2 mice_3.16.0 crayon_1.5.2
## [88] minqa_1.2.5 htmltools_0.5.4 ggfun_0.1.3
## [91] mgcv_1.9-0 tzdb_0.3.0 Formula_1.2-4
## [94] MASS_7.3-58.1 boot_1.3-28 cli_3.6.1
## [97] quadprog_1.5-8 parallel_4.2.2 insight_0.19.7
## [100] pan_1.9 pkgconfig_2.0.3 numDeriv_2016.8-1.1
## [103] foreign_0.8-83 foreach_1.5.2 pbivnorm_0.6.0
## [106] bslib_0.4.2 estimability_1.4.1 snakecase_0.11.0
## [109] yulab.utils_0.1.0 distributional_0.3.2 digest_0.6.31
## [112] parameters_0.21.3 rmarkdown_2.19 cellranger_1.1.0
## [115] htmlTable_2.4.1 quantreg_5.94 jomo_2.7-6
## [118] nloptr_2.0.3 lifecycle_1.0.3 nlme_3.1-160
## [121] jsonlite_1.8.4 fansi_1.0.3 pillar_1.9.0
## [124] lattice_0.21-9 fstcore_0.9.14 fastmap_1.1.0
## [127] DEoptimR_1.1-2 survival_3.4-0 glue_1.6.2
## [130] bayestestR_0.13.1 iterators_1.0.14 glmnet_4.1-8
## [133] bit_4.0.5 stringi_1.7.8 sass_0.4.4
## [136] textshaping_0.3.6 polspline_1.1.22 memoise_2.0.1